ia64: Some fixes after spinlock implementation changes.
authorKeir Fraser <keir.fraser@citrix.com>
Tue, 21 Oct 2008 17:00:21 +0000 (18:00 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Tue, 21 Oct 2008 17:00:21 +0000 (18:00 +0100)
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/ia64/vmx/vmmu.c
xen/include/asm-ia64/linux-xen/asm/spinlock.h

index 4e1497514b0782174bc32696269f15674f6ab3cd..3bf05290515583c6e64e5d496eb5944c4c81d090 100644 (file)
@@ -446,7 +446,7 @@ IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
         do {
             cpu = v->processor;
             if (cpu != current->processor) {
-                spin_unlock_wait(&per_cpu(schedule_data, cpu).schedule_lock);
+                spin_barrier(&per_cpu(schedule_data, cpu).schedule_lock);
                 /* Flush VHPT on remote processors. */
                 smp_call_function_single(cpu, &ptc_ga_remote_func,
                                          &args, 0, 1);
index 981b6f9d64a69d99db81ac48cb4958d122c817ea..115fe6e49c40e88504a0fac7ac501fa264c953be 100644 (file)
@@ -130,7 +130,6 @@ do {                                                                                        \
 #define _raw_spin_is_locked(x) ((x)->lock != 0)
 #define _raw_spin_unlock(x)    do { barrier(); (x)->lock = 0; } while (0)
 #define _raw_spin_trylock(x)   (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
-#define spin_unlock_wait(x)    do { barrier(); } while ((x)->lock)
 
 typedef struct {
        volatile unsigned int read_counter      : 31;
@@ -141,9 +140,6 @@ typedef struct {
 } raw_rwlock_t;
 #define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0, 0 }
 
-#define read_can_lock(rw)      (*(volatile int *)(rw) >= 0)
-#define write_can_lock(rw)     (*(volatile int *)(rw) == 0)
-
 #define _raw_read_lock(rw)                                                             \
 do {                                                                                   \
        raw_rwlock_t *__read_lock_ptr = (rw);                                           \